wall_clock_time.tv_usec));
/* Reload the timer. */
- again:
update_timer.expires = new_st + MILLISECS(200);
- if(add_ac_timer(&update_timer) == 1)
- goto again;
+ add_ac_timer(&update_timer);
}
/***************************************************************************
wctime_st = NOW();
/* start timer to update time periodically */
- init_ac_timer(&update_timer);
+ init_ac_timer(&update_timer, 0);
update_timer.function = &update_time;
update_time(0);
#include <xeno/sched.h>
#include <xeno/lib.h>
#include <xeno/smp.h>
-
#include <xeno/perfc.h>
-
#include <xeno/time.h>
+#include <xeno/interrupt.h>
#include <xeno/ac_timer.h>
#include <xeno/keyhandler.h>
-
#include <asm/system.h>
#include <asm/desc.h>
} __cacheline_aligned ac_timers_t;
static ac_timers_t ac_timers[NR_CPUS];
-/* local prototypes */
-static int detach_ac_timer(struct ac_timer *timer);
-
/*****************************************************************************
* add a timer.
- * return value:
- * 0: success
- * 1: failure, timer in the past or timeout value to small
- * -1: failure, timer uninitialised
- * fail
+ * return value: CPU mask of remote processors to send an event to
*****************************************************************************/
-int add_ac_timer(struct ac_timer *timer)
+static inline unsigned long __add_ac_timer(struct ac_timer *timer)
{
- int cpu = smp_processor_id();
- unsigned long flags;
- s_time_t now;
-
- /* make sure timeout value is in the future */
-
- now = NOW();
- if (timer->expires <= now) {
- TRC(printk("ACT[%02d] add_ac_timer:now=0x%08X%08X>expire=0x%08X%08X\n",
- cpu, (u32)(now>>32), (u32)now,
- (u32)(timer->expires>>32), (u32)timer->expires));
- return 1;
- }
-
- spin_lock_irqsave(&ac_timers[cpu].lock, flags);
+ int cpu = timer->cpu;
/*
- * Add timer to the list. If it gets added to the front we have to
- * reprogramm the timer
+ * Add timer to the list. If it gets added to the front we schedule
+ * a softirq. This will reprogram the timer, or handle the timer event
+ * imemdiately, depending on whether alarm is sufficiently ahead in the
+ * future.
*/
if (list_empty(&ac_timers[cpu].timers)) {
- if (!reprogram_ac_timer(timer->expires)) {
- printk("ACT[%02d] add at head failed\n", cpu);
- spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
- return 1; /* failed */
- }
list_add(&timer->timer_list, &ac_timers[cpu].timers);
+ goto send_softirq;
} else {
struct list_head *pos;
struct ac_timer *t;
if (t->expires > timer->expires)
break;
}
- list_add (&(timer->timer_list), pos->prev);
-
- if (timer->timer_list.prev == &ac_timers[cpu].timers) {
- /* added at head */
- if (!reprogram_ac_timer(timer->expires)) {
- printk("ACT[%02d] add at head failed\n", cpu);
- detach_ac_timer(timer);
- spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
- return 1; /* failed */
- }
- }
+ list_add(&(timer->timer_list), pos->prev);
+
+ if (timer->timer_list.prev == &ac_timers[cpu].timers)
+ goto send_softirq;
}
- spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
+
return 0;
+
+ send_softirq:
+ __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
+ return (cpu != smp_processor_id()) ? 1<<cpu : 0;
}
+void add_ac_timer(struct ac_timer *timer)
+{
+ int cpu = timer->cpu;
+ unsigned long flags, cpu_mask;
+
+ spin_lock_irqsave(&ac_timers[cpu].lock, flags);
+ ASSERT(timer != NULL);
+ ASSERT(!active_ac_timer(timer));
+ cpu_mask = __add_ac_timer(timer);
+ spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
+
+ if ( cpu_mask ) smp_send_event_check_mask(cpu_mask);
+}
+
+
/*****************************************************************************
* detach a timer (no locking)
* return values:
* 0: success
* -1: bogus timer
*****************************************************************************/
-static int detach_ac_timer(struct ac_timer *timer)
+static inline void detach_ac_timer(struct ac_timer *timer)
{
TRC(printk("ACT [%02d] detach(): \n", cpu));
list_del(&timer->timer_list);
timer->timer_list.next = NULL;
- return 0;
}
+
/*****************************************************************************
* remove a timer
- * return values:
- * 0: success
- * -1: bogus timer
+ * return values: CPU mask of remote processors to send an event to
*****************************************************************************/
-int rem_ac_timer(struct ac_timer *timer)
+static inline unsigned long __rem_ac_timer(struct ac_timer *timer)
{
- int cpu = smp_processor_id();
- int res = 0;
- unsigned long flags;
+ int cpu = timer->cpu;
TRC(printk("ACT [%02d] remove(): timo=%lld \n", cpu, timer->expires));
- spin_lock_irqsave(&ac_timers[cpu].lock, flags);
- if (timer->timer_list.next) {
- res = detach_ac_timer(timer);
-
- if (timer->timer_list.prev == &ac_timers[cpu].timers) {
- /* just removed the head */
- if (list_empty(&ac_timers[cpu].timers)) {
- reprogram_ac_timer((s_time_t) 0);
- } else {
- timer = list_entry(ac_timers[cpu].timers.next,
- struct ac_timer, timer_list);
- if ( timer->expires > (NOW() + TIMER_SLOP) )
- reprogram_ac_timer(timer->expires);
- }
+ ASSERT(timer->timer_list.next);
+
+ detach_ac_timer(timer);
+
+ if (timer->timer_list.prev == &ac_timers[cpu].timers) {
+ /* just removed the head */
+ if (list_empty(&ac_timers[cpu].timers)) {
+ goto send_softirq;
+ } else {
+ timer = list_entry(ac_timers[cpu].timers.next,
+ struct ac_timer, timer_list);
+ if ( timer->expires > (NOW() + TIMER_SLOP) )
+ goto send_softirq;
}
- } else
- res = -1;
+ }
+ return 0;
+
+ send_softirq:
+ __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
+ return (cpu != smp_processor_id()) ? 1<<cpu : 0;
+}
+
+void rem_ac_timer(struct ac_timer *timer)
+{
+ int cpu = timer->cpu;
+ unsigned long flags, cpu_mask = 0;
+
+ spin_lock_irqsave(&ac_timers[cpu].lock, flags);
+ ASSERT(timer != NULL);
+ if ( active_ac_timer(timer) )
+ cpu_mask = __rem_ac_timer(timer);
spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
- return res;
+ if ( cpu_mask ) smp_send_event_check_mask(cpu_mask);
}
+
/*****************************************************************************
* modify a timer, i.e., set a new timeout value
* return value:
* 0: sucess
- * -1: error
+ * 1: timeout error
+ * -1: bogus timer
*****************************************************************************/
-int mod_ac_timer(struct ac_timer *timer, s_time_t new_time)
+void mod_ac_timer(struct ac_timer *timer, s_time_t new_time)
{
- if (rem_ac_timer(timer) != 0)
- return -1;
+ int cpu = timer->cpu;
+ unsigned long flags, cpu_mask = 0;
+
+ spin_lock_irqsave(&ac_timers[cpu].lock, flags);
+
+ ASSERT(timer != NULL);
+
+ if ( active_ac_timer(timer) )
+ cpu_mask = __rem_ac_timer(timer);
timer->expires = new_time;
- if (add_ac_timer(timer) != 0)
- return -1;
- return 0;
+ cpu_mask |= __add_ac_timer(timer);
+
+ spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
+
+ if ( cpu_mask ) smp_send_event_check_mask(cpu_mask);
}
+
/*****************************************************************************
* do_ac_timer
* deal with timeouts and run the handlers
spin_lock_irqsave(&ac_timers[cpu].lock, flags);
do_timer_again:
-
TRC(printk("ACT [%02d] do(): now=%lld\n", cpu, NOW()));
/* Sanity: is the timer list empty? */
- if ( list_empty(&ac_timers[cpu].timers) ) {
- /* This does sometimes happen: race condition in resetting timeout? */
- spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
- return;
- }
+ if ( list_empty(&ac_timers[cpu].timers) ) goto out;
/* Handle all timeouts in the near future. */
while ( !list_empty(&ac_timers[cpu].timers) )
t = list_entry(ac_timers[cpu].timers.next,struct ac_timer, timer_list);
if ( t->expires > (NOW() + TIMER_SLOP) ) break;
+ ASSERT(t->cpu == cpu);
+
/* do some stats */
diff = (now - t->expires);
if (diff > 0x7fffffff) diff = 0x7fffffff; /* THIS IS BAD! */
if ( !list_empty(&ac_timers[cpu].timers) )
{
t = list_entry(ac_timers[cpu].timers.next,struct ac_timer, timer_list);
- if ( t->expires > 0 )
+ TRC(printk("ACT [%02d] do(): reprog timo=%lld\n",cpu,t->expires));
+ if ( !reprogram_ac_timer(t->expires) )
{
- TRC(printk("ACT [%02d] do(): reprog timo=%lld\n",cpu,t->expires));
- if ( !reprogram_ac_timer(t->expires) )
- {
- TRC(printk("ACT [%02d] do(): again\n", cpu));
- goto do_timer_again;
- }
+ TRC(printk("ACT [%02d] do(): again\n", cpu));
+ goto do_timer_again;
}
} else {
reprogram_ac_timer((s_time_t) 0);
}
+ out:
spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
TRC(printk("ACT [%02d] do(): end\n", cpu));
}
+
/*****************************************************************************
* debug dump_queue
* arguments: queue head, name of queue
return;
}
+
+static void ac_timer_softirq_action(struct softirq_action *a)
+{
+ int cpu = smp_processor_id();
+ unsigned long flags;
+ struct ac_timer *t;
+ struct list_head *tlist;
+
+ spin_lock_irqsave(&ac_timers[cpu].lock, flags);
+
+ tlist = &ac_timers[cpu].timers;
+ if ( list_empty(tlist) )
+ {
+ reprogram_ac_timer((s_time_t)0);
+ spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
+ return;
+ }
+
+ t = list_entry(tlist, struct ac_timer, timer_list);
+
+ if ( (t->expires < (NOW() + TIMER_SLOP)) ||
+ !reprogram_ac_timer(t->expires) )
+ {
+ /*
+ * Timer handler needs protecting from local APIC interrupts, but takes
+ * the spinlock itself, so we release that before calling in.
+ */
+ spin_unlock(&ac_timers[cpu].lock);
+ do_ac_timer();
+ local_irq_restore(flags);
+ }
+}
+
+
void dump_timerq(u_char key, void *dev_id, struct pt_regs *regs)
{
u_long flags;
printk ("ACT: Initialising Accurate timers\n");
+ open_softirq(AC_TIMER_SOFTIRQ, ac_timer_softirq_action, NULL);
+
for (i = 0; i < NR_CPUS; i++)
{
INIT_LIST_HEAD(&ac_timers[i].timers);
}
}
+
/*****************************************************************************
* GRAVEYARD
*****************************************************************************/
sched_done:
ASSERT(r_time >= ctx_allow);
-#ifndef NDEBUG
- if (r_time < ctx_allow) {
- printk("[%02d]: %lx\n", this_cpu, r_time);
- dump_rqueue(&schedule_data[this_cpu].runqueue, "foo");
- }
-#endif
-
prev->has_cpu = 0;
next->has_cpu = 1;
next->lastschd = now;
/* reprogramm the timer */
- timer_redo:
schedule_data[this_cpu].s_timer.expires = now + r_time;
- if (add_ac_timer(&schedule_data[this_cpu].s_timer) == 1) {
- printk("SCHED[%02d]: timeout already happened! r_time=%u\n",
- this_cpu, r_time);
- now = NOW();
- goto timer_redo;
- }
+ add_ac_timer(&schedule_data[this_cpu].s_timer);
spin_unlock_irq(&schedule_data[this_cpu].lock);
unsigned long cpu_mask = 0;
struct task_struct *p;
s_time_t now;
- int res;
/* send virtual timer interrupt */
read_lock(&tasklist_lock);
read_unlock(&tasklist_lock);
guest_event_notify(cpu_mask);
- again:
now = NOW();
v_timer.expires = now + MILLISECS(10);
- res=add_ac_timer(&v_timer);
- if (res==1)
- goto again;
+ add_ac_timer(&v_timer);
}
/*
schedule_data[i].curr = &idle0_task;
/* a timer for each CPU */
- init_ac_timer(&schedule_data[i].s_timer);
+ init_ac_timer(&schedule_data[i].s_timer, i);
schedule_data[i].s_timer.function = &sched_timer;
}
schedule_data[0].idle = &idle0_task; /* idle on CPU 0 is special */
- init_ac_timer(&v_timer);
+ init_ac_timer(&v_timer, 0);
v_timer.function = &virt_timer;
}
asmlinkage void do_softirq()
{
- int cpu = smp_processor_id();
- __u32 pending;
- long flags;
-
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
-
- pending = softirq_pending(cpu);
-
- while (pending) {
- struct softirq_action *h;
-
- local_bh_disable();
-restart:
- /* Reset the pending bitmask before enabling irqs */
- softirq_pending(cpu) = 0;
-
- local_irq_enable();
-
- h = softirq_vec;
-
- do {
- if (pending & 1)
- h->action(h);
- h++;
- pending >>= 1;
- } while (pending);
-
- local_irq_disable();
-
- pending = softirq_pending(cpu);
- if (pending) goto restart;
- __local_bh_enable();
- }
-
- local_irq_restore(flags);
+ int cpu = smp_processor_id();
+ struct softirq_action *h;
+ __u32 pending;
+ long flags;
+
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+
+ pending = xchg(&softirq_pending(cpu), 0);
+ if ( !pending ) goto out;
+
+ local_bh_disable();
+
+ do {
+ local_irq_enable();
+
+ h = softirq_vec;
+
+ do {
+ if (pending & 1)
+ h->action(h);
+ h++;
+ pending >>= 1;
+ } while (pending);
+
+ local_irq_disable();
+
+ pending = xchg(&softirq_pending(cpu), 0);
+ } while ( pending );
+
+ __local_bh_enable();
+
+out:
+ local_irq_restore(flags);
}
/*
*/
inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
{
- __cpu_raise_softirq(cpu, nr);
+ __cpu_raise_softirq(cpu, nr);
#ifdef CONFIG_SMP
- if ( cpu != smp_processor_id() )
- smp_send_event_check_cpu(cpu);
+ if ( cpu != smp_processor_id() )
+ smp_send_event_check_cpu(cpu);
#endif
}
void raise_softirq(unsigned int nr)
{
- long flags;
+ long flags;
- local_irq_save(flags);
- cpu_raise_softirq(smp_processor_id(), nr);
- local_irq_restore(flags);
+ local_irq_save(flags);
+ cpu_raise_softirq(smp_processor_id(), nr);
+ local_irq_restore(flags);
}
void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
{
- softirq_vec[nr].data = data;
- softirq_vec[nr].action = action;
+ softirq_vec[nr].data = data;
+ softirq_vec[nr].action = action;
}
void __tasklet_schedule(struct tasklet_struct *t)
{
- int cpu = smp_processor_id();
- unsigned long flags;
-
- local_irq_save(flags);
- t->next = tasklet_vec[cpu].list;
- tasklet_vec[cpu].list = t;
- cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
- local_irq_restore(flags);
+ int cpu = smp_processor_id();
+ unsigned long flags;
+
+ local_irq_save(flags);
+ t->next = tasklet_vec[cpu].list;
+ tasklet_vec[cpu].list = t;
+ cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ local_irq_restore(flags);
}
void __tasklet_hi_schedule(struct tasklet_struct *t)
{
- int cpu = smp_processor_id();
- unsigned long flags;
-
- local_irq_save(flags);
- t->next = tasklet_hi_vec[cpu].list;
- tasklet_hi_vec[cpu].list = t;
- cpu_raise_softirq(cpu, HI_SOFTIRQ);
- local_irq_restore(flags);
+ int cpu = smp_processor_id();
+ unsigned long flags;
+
+ local_irq_save(flags);
+ t->next = tasklet_hi_vec[cpu].list;
+ tasklet_hi_vec[cpu].list = t;
+ cpu_raise_softirq(cpu, HI_SOFTIRQ);
+ local_irq_restore(flags);
}
static void tasklet_action(struct softirq_action *a)
{
- int cpu = smp_processor_id();
- struct tasklet_struct *list;
-
- local_irq_disable();
- list = tasklet_vec[cpu].list;
- tasklet_vec[cpu].list = NULL;
- local_irq_enable();
-
- while (list) {
- struct tasklet_struct *t = list;
-
- list = list->next;
-
- if (tasklet_trylock(t)) {
- if (!atomic_read(&t->count)) {
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- BUG();
- t->func(t->data);
- }
- tasklet_unlock(t);
- continue;
- }
-
- local_irq_disable();
- t->next = tasklet_vec[cpu].list;
- tasklet_vec[cpu].list = t;
- __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
- local_irq_enable();
- }
+ int cpu = smp_processor_id();
+ struct tasklet_struct *list;
+
+ local_irq_disable();
+ list = tasklet_vec[cpu].list;
+ tasklet_vec[cpu].list = NULL;
+ local_irq_enable();
+
+ while (list) {
+ struct tasklet_struct *t = list;
+
+ list = list->next;
+
+ if (tasklet_trylock(t)) {
+ if (!atomic_read(&t->count)) {
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ t->func(t->data);
+ }
+ tasklet_unlock(t);
+ continue;
+ }
+
+ local_irq_disable();
+ t->next = tasklet_vec[cpu].list;
+ tasklet_vec[cpu].list = t;
+ __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ local_irq_enable();
+ }
}
static void tasklet_hi_action(struct softirq_action *a)
{
- int cpu = smp_processor_id();
- struct tasklet_struct *list;
-
- local_irq_disable();
- list = tasklet_hi_vec[cpu].list;
- tasklet_hi_vec[cpu].list = NULL;
- local_irq_enable();
-
- while (list) {
- struct tasklet_struct *t = list;
-
- list = list->next;
-
- if (tasklet_trylock(t)) {
- if (!atomic_read(&t->count)) {
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- BUG();
- t->func(t->data);
- }
- tasklet_unlock(t);
- continue;
- }
-
- local_irq_disable();
- t->next = tasklet_hi_vec[cpu].list;
- tasklet_hi_vec[cpu].list = t;
- __cpu_raise_softirq(cpu, HI_SOFTIRQ);
- local_irq_enable();
- }
+ int cpu = smp_processor_id();
+ struct tasklet_struct *list;
+
+ local_irq_disable();
+ list = tasklet_hi_vec[cpu].list;
+ tasklet_hi_vec[cpu].list = NULL;
+ local_irq_enable();
+
+ while (list) {
+ struct tasklet_struct *t = list;
+
+ list = list->next;
+
+ if (tasklet_trylock(t)) {
+ if (!atomic_read(&t->count)) {
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ t->func(t->data);
+ }
+ tasklet_unlock(t);
+ continue;
+ }
+
+ local_irq_disable();
+ t->next = tasklet_hi_vec[cpu].list;
+ tasklet_hi_vec[cpu].list = t;
+ __cpu_raise_softirq(cpu, HI_SOFTIRQ);
+ local_irq_enable();
+ }
}
void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data)
{
- t->next = NULL;
- t->state = 0;
- atomic_set(&t->count, 0);
- t->func = func;
- t->data = data;
+ t->next = NULL;
+ t->state = 0;
+ atomic_set(&t->count, 0);
+ t->func = func;
+ t->data = data;
}
void tasklet_kill(struct tasklet_struct *t)
{
- if (in_interrupt())
- printk("Attempt to kill tasklet from interrupt\n");
-
- while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
- set_current_state(TASK_RUNNING);
- do {
- current->policy |= SCHED_YIELD;
- schedule();
- } while (test_bit(TASKLET_STATE_SCHED, &t->state));
- }
- tasklet_unlock_wait(t);
- clear_bit(TASKLET_STATE_SCHED, &t->state);
+ if (in_interrupt())
+ printk("Attempt to kill tasklet from interrupt\n");
+
+ while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+ set_current_state(TASK_RUNNING);
+ do {
+ current->policy |= SCHED_YIELD;
+ schedule();
+ } while (test_bit(TASKLET_STATE_SCHED, &t->state));
+ }
+ tasklet_unlock_wait(t);
+ clear_bit(TASKLET_STATE_SCHED, &t->state);
}
static void bh_action(unsigned long nr)
{
- int cpu = smp_processor_id();
+ int cpu = smp_processor_id();
- if (!spin_trylock(&global_bh_lock))
- goto resched;
+ if (!spin_trylock(&global_bh_lock))
+ goto resched;
- if (!hardirq_trylock(cpu))
- goto resched_unlock;
+ if (!hardirq_trylock(cpu))
+ goto resched_unlock;
- if (bh_base[nr])
- bh_base[nr]();
+ if (bh_base[nr])
+ bh_base[nr]();
- hardirq_endlock(cpu);
- spin_unlock(&global_bh_lock);
- return;
+ hardirq_endlock(cpu);
+ spin_unlock(&global_bh_lock);
+ return;
-resched_unlock:
- spin_unlock(&global_bh_lock);
-resched:
- mark_bh(nr);
+ resched_unlock:
+ spin_unlock(&global_bh_lock);
+ resched:
+ mark_bh(nr);
}
void init_bh(int nr, void (*routine)(void))
{
- bh_base[nr] = routine;
- mb();
+ bh_base[nr] = routine;
+ mb();
}
void remove_bh(int nr)
{
- tasklet_kill(bh_task_vec+nr);
- bh_base[nr] = NULL;
+ tasklet_kill(bh_task_vec+nr);
+ bh_base[nr] = NULL;
}
void __init softirq_init()
{
- int i;
+ int i;
- for (i=0; i<32; i++)
- tasklet_init(bh_task_vec+i, bh_action, i);
+ for (i=0; i<32; i++)
+ tasklet_init(bh_task_vec+i, bh_action, i);
- open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
- open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
+ open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
+ open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
}
void __run_task_queue(task_queue *list)
{
- struct list_head head, *next;
- unsigned long flags;
-
- spin_lock_irqsave(&tqueue_lock, flags);
- list_add(&head, list);
- list_del_init(list);
- spin_unlock_irqrestore(&tqueue_lock, flags);
-
- next = head.next;
- while (next != &head) {
- void (*f) (void *);
- struct tq_struct *p;
- void *data;
-
- p = list_entry(next, struct tq_struct, list);
- next = next->next;
- f = p->routine;
- data = p->data;
- wmb();
- p->sync = 0;
- if (f)
- f(data);
- }
+ struct list_head head, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tqueue_lock, flags);
+ list_add(&head, list);
+ list_del_init(list);
+ spin_unlock_irqrestore(&tqueue_lock, flags);
+
+ next = head.next;
+ while (next != &head) {
+ void (*f) (void *);
+ struct tq_struct *p;
+ void *data;
+
+ p = list_entry(next, struct tq_struct, list);
+ next = next->next;
+ f = p->routine;
+ data = p->data;
+ wmb();
+ p->sync = 0;
+ if (f)
+ f(data);
+ }
}
s_time_t expires; /* system time time out value */
unsigned long data;
void (*function)(unsigned long);
+ int cpu;
};
/* interface for "clients" */
-extern int add_ac_timer(struct ac_timer *timer);
-extern int rem_ac_timer(struct ac_timer *timer);
-extern int mod_ac_timer(struct ac_timer *timer, s_time_t new_time);
-static inline void init_ac_timer(struct ac_timer *timer)
+extern void add_ac_timer(struct ac_timer *timer);
+extern void rem_ac_timer(struct ac_timer *timer);
+extern void mod_ac_timer(struct ac_timer *timer, s_time_t new_time);
+static __inline__ void init_ac_timer(struct ac_timer *timer, int cpu)
{
+ timer->cpu = cpu;
timer->timer_list.next = NULL;
}
+/* check if ac_timer is active, i.e., on the list */
+static __inline__ int active_ac_timer(struct ac_timer *timer)
+{
+ return (timer->timer_list.next != NULL);
+}
/* interface used by programmable timer, implemented hardware dependent */
extern int reprogram_ac_timer(s_time_t timeout);
#define _LINUX_INTERRUPT_H
#include <linux/config.h>
-//#include <linux/kernel.h>
+#include <linux/lib.h>
#include <linux/smp.h>
#include <linux/cache.h>
enum
{
HI_SOFTIRQ=0,
+ AC_TIMER_SOFTIRQ,
TASKLET_SOFTIRQ
};
asmlinkage void do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
extern void softirq_init(void);
-#define __cpu_raise_softirq(cpu, nr) do { softirq_pending(cpu) |= 1UL << (nr); } while (0)
+#define __cpu_raise_softirq(cpu, nr) set_bit(nr, &softirq_pending(cpu))
extern void FASTCALL(cpu_raise_softirq(unsigned int cpu, unsigned int nr));
extern void FASTCALL(raise_softirq(unsigned int nr));